#include <asm/hypervisor.h>
#include <xen/balloon.h>
#include <xen/interface/memory.h>
+#include <asm/maddr.h>
+#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
+#include <linux/highmem.h>
#include <linux/list.h>
#include <xen/xenbus.h>
#include "common.h"
+#ifndef CONFIG_XEN
+#define scrub_pages(_p,_n)
+#endif
+
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *balloon_pde;
#endif
set_phys_to_machine(pfn, frame_list[i]);
+#ifdef CONFIG_XEN
/* Link back into the page tables if not highmem. */
if (pfn < max_low_pfn) {
int ret;
0);
BUG_ON(ret);
}
+#endif
/* Relinquish the page back to the allocator. */
ClearPageReserved(page);
return 0;
}
+extern void xen_invalidate_foreign_mappings(void);
+
static int decrease_reservation(unsigned long nr_pages)
{
unsigned long pfn, i, flags;
(unsigned long)v, __pte_ma(0), 0);
BUG_ON(ret);
}
-#ifdef CONFIG_XEN_SCRUB_PAGES
+#ifdef CONFIG_XEN
else {
v = kmap(page);
scrub_pages(v, 1);
#endif
}
+#ifdef CONFIG_XEN
/* Ensure that ballooned highmem pages don't have kmaps. */
kmap_flush_unused();
flush_tlb_all();
+#endif
balloon_lock(flags);
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = mfn_to_pfn(frame_list[i]);
+#ifdef CONFIG_XEN
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+#endif
balloon_append(pfn_to_page(pfn));
}
+ xen_invalidate_foreign_mappings();
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
static int __init balloon_init(void)
{
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86) && defined(CONFIG_XEN)
unsigned long pfn;
struct page *page;
#endif
IPRINTK("Initialising balloon driver.\n");
+#ifdef CONFIG_XEN
bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
totalram_pages = bs.current_pages;
+#else
+ bs.current_pages = totalram_pages;
+#endif
bs.target_pages = bs.current_pages;
bs.balloon_low = 0;
bs.balloon_high = 0;
#endif
balloon_sysfs_init();
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86) && defined(CONFIG_XEN)
/* Initialise the balloon with excess memory space. */
for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
page = pfn_to_page(pfn);
subsys_initcall(balloon_init);
+static void balloon_exit(void)
+{
+ /* XXX - release balloon here */
+ return;
+}
+
+module_exit(balloon_exit);
+
void balloon_update_driver_allowance(long delta)
{
unsigned long flags;
balloon_unlock(flags);
}
+#ifdef CONFIG_XEN
static int dealloc_pte_fn(
pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
{
BUG_ON(ret != 1);
return 0;
}
+#endif
struct page **alloc_empty_pages_and_pagevec(int nr_pages)
{
if (ret == 1)
ret = 0; /* success */
} else {
+#ifdef CONFIG_XEN
ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
dealloc_pte_fn, NULL);
+#else
+ /* cannot handle non-auto translate mode */
+ ret = 1;
+#endif
}
if (ret != 0) {
out:
schedule_work(&balloon_worker);
+#ifdef CONFIG_XEN
flush_tlb_all();
+#endif
return pagevec;
err:
*/
#include <linux/capability.h>
+#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/sysdev.h>
#include "common.h"
static unsigned int __max_nr_grant_frames(void)
{
struct gnttab_query_size query;
- int rc;
+ int rc = -1;
query.dom = DOMID_SELF;
+#ifdef CONFIG_XEN
rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
+#endif
if ((rc < 0) || (query.status != GNTST_okay))
return 4; /* Legacy max supported number of frames */
+#include <linux/module.h>
#include <linux/proc_fs.h>
#include <xen/xen_proc.h>
return create_proc_entry(name, mode, xen_base);
}
+EXPORT_SYMBOL_GPL(create_xen_proc_entry);
+
void remove_xen_proc_entry(const char *name)
{
remove_proc_entry(name, xen_base);
}
+
+EXPORT_SYMBOL_GPL(remove_xen_proc_entry);
obj-m += platform-pci/
obj-m += xenbus/
+obj-m += balloon/
obj-m += blkfront/
obj-m += netfront/
obj-m += util/
--- /dev/null
+include $(M)/overrides.mk
+
+obj-m = xen-balloon.o
+
+EXTRA_CFLAGS += -I$(M)/platform-pci
+
+xen-balloon-objs =
+xen-balloon-objs += balloon.o
+xen-balloon-objs += sysfs.o
--- /dev/null
+ifneq ($(KERNELRELEASE),)
+include $(src)/Kbuild
+endif
if ! echo $d | egrep -q back; then
lndir $d $(basename $d) > /dev/null 2>&1
fi
+ if ! echo $d | egrep -q ball; then
+ lndir $d $(basename $d) > /dev/null 2>&1
+ fi
done
ln -sf ${XL}/drivers/xen/core/gnttab.c platform-pci
ln -sf ${XL}/include/asm-i386/mach-xen/asm/hypercall.h include/asm
ln -sf ${XL}/include/asm-i386/mach-xen/asm/synch_bitops.h include/asm
ln -sf ${XL}/include/asm-i386/mach-xen/asm/maddr.h include/asm
+ ln -sf ${XL}/include/asm-i386/mach-xen/asm/page.h include/asm
;;
"ia64")
ln -sf ${XL}/include/asm-ia64/hypervisor.h include/asm
}
EXPORT_SYMBOL(xen_machphys_update);
-void balloon_update_driver_allowance(long delta)
-{
-}
-EXPORT_SYMBOL(balloon_update_driver_allowance);
-
-void balloon_release_driver_page(struct page *page)
-{
-}
-EXPORT_SYMBOL(balloon_release_driver_page);
HYPERCALL(hvm_op)
};
-void hvm_do_hypercall(struct cpu_user_regs *pregs)
+int hvm_do_hypercall(struct cpu_user_regs *pregs)
{
if ( unlikely(ring_3(pregs)) )
{
pregs->eax = -EPERM;
- return;
+ return 0;
}
if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
current->domain->domain_id, current->vcpu_id,
pregs->eax);
pregs->eax = -ENOSYS;
- return;
+ return 0;
}
+ /* Install a canary value in regs->eip so can check for continuation */
+ pregs->eip |= 0xF;
+
pregs->eax = hvm_hypercall_table[pregs->eax](
pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
+
+ /* XXX: pot fake IO instr here to inform the emulator to flush mapcache */
+
+ if( (pregs->eip & 0xF) == 0 ) /* preempted */
+ return 1;
+
+ return 0;
}
#else /* defined(__x86_64__) */
HYPERCALL(event_channel_op)
};
-void hvm_do_hypercall(struct cpu_user_regs *pregs)
+int hvm_do_hypercall(struct cpu_user_regs *pregs)
{
if ( unlikely(ring_3(pregs)) )
{
pregs->rax = -EPERM;
- return;
+ return 0;
}
pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
current->domain->domain_id, current->vcpu_id,
pregs->rax);
pregs->rax = -ENOSYS;
- return;
+ return 0;
}
if ( current->arch.paging.mode->guest_levels == 4 )
(uint32_t)pregs->esi,
(uint32_t)pregs->edi);
}
+ return 0; /* XXX SMH: fix for preempt here */
}
#endif /* defined(__x86_64__) */
pending = (vcpu_info(v, evtchn_upcall_pending) || cpu_has_pending_irq(v));
if ( unlikely(pending) )
- {
- struct cpu_user_regs regs;
- hvm_store_cpu_guest_regs(v, ®s, NULL);
- pending = !irq_masked(regs.eflags);
- }
+ pending = hvm_interrupts_enabled(v);
return pending;
}
return (eflags & X86_EFLAGS_VM) || !(cr0 & X86_CR0_PE);
}
+static int svm_interrupts_enabled(struct vcpu *v)
+{
+ unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
+ return !irq_masked(eflags);
+}
+
static int svm_guest_x86_mode(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
.paging_enabled = svm_paging_enabled,
.long_mode_enabled = svm_long_mode_enabled,
.pae_enabled = svm_pae_enabled,
+ .interrupts_enabled = svm_interrupts_enabled,
.guest_x86_mode = svm_guest_x86_mode,
.get_guest_ctrl_reg = svm_get_ctrl_reg,
.get_segment_base = svm_get_segment_base,
inst_len = __get_instruction_length(v, INSTR_VMCALL, NULL);
ASSERT(inst_len > 0);
HVMTRACE_1D(VMMCALL, v, regs->eax);
- __update_guest_eip(vmcb, inst_len);
- hvm_do_hypercall(regs);
+ if(hvm_do_hypercall(regs) == 0) /* not preempted */
+ __update_guest_eip(vmcb, inst_len);
break;
case VMEXIT_CR0_READ:
return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
}
+static int vmx_interrupts_enabled(struct vcpu *v)
+{
+ unsigned long eflags = __vmread(GUEST_RFLAGS);
+ return !irq_masked(eflags);
+}
+
+
static void vmx_update_host_cr3(struct vcpu *v)
{
ASSERT( (v == current) || !vcpu_runnable(v) );
.paging_enabled = vmx_paging_enabled,
.long_mode_enabled = vmx_long_mode_enabled,
.pae_enabled = vmx_pae_enabled,
+ .interrupts_enabled = vmx_interrupts_enabled,
.guest_x86_mode = vmx_guest_x86_mode,
.get_guest_ctrl_reg = vmx_get_ctrl_reg,
.get_segment_base = vmx_get_segment_base,
{
HVMTRACE_1D(VMMCALL, v, regs->eax);
inst_len = __get_instruction_length(); /* Safe: VMCALL */
- __update_guest_eip(inst_len);
- hvm_do_hypercall(regs);
+ if(hvm_do_hypercall(regs) == 0) /* not preempted */
+ __update_guest_eip(inst_len);
break;
}
case EXIT_REASON_CR_ACCESS:
if ( unlikely((page->count_info & PGC_count_mask) != 1) )
{
shadow_drop_references(d, page);
- /* We'll make this a guest-visible error in future, so take heed! */
- if ( (page->count_info & PGC_count_mask) != 1 )
- gdprintk(XENLOG_INFO, "Dom%d freeing in-use page %lx "
- "(pseudophys %lx): count=%lx type=%lx\n",
- d->domain_id, mfn, get_gpfn_from_mfn(mfn),
- (unsigned long)page->count_info, page->u.inuse.type_info);
+ /* NB: still may have foreign references to the page at this stage */
}
guest_physmap_remove_page(d, gmfn, mfn);
* 1) determine whether paging is enabled,
* 2) determine whether long mode is enabled,
* 3) determine whether PAE paging is enabled,
- * 4) determine the mode the guest is running in,
- * 5) return the current guest control-register value
- * 6) return the current guest segment descriptor base
+ * 4) determine whether interrupts are enabled or not,
+ * 5) determine the mode the guest is running in,
+ * 6) return the current guest control-register value
+ * 7) return the current guest segment descriptor base
*/
int (*paging_enabled)(struct vcpu *v);
int (*long_mode_enabled)(struct vcpu *v);
int (*pae_enabled)(struct vcpu *v);
+ int (*interrupts_enabled)(struct vcpu *v);
int (*guest_x86_mode)(struct vcpu *v);
unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
return hvm_funcs.pae_enabled(v);
}
+static inline int
+hvm_interrupts_enabled(struct vcpu *v)
+{
+ return hvm_funcs.interrupts_enabled(v);
+}
+
static inline int
hvm_guest_x86_mode(struct vcpu *v)
{
void hvm_print_line(struct vcpu *v, const char c);
void hlt_timer_fn(void *data);
-void hvm_do_hypercall(struct cpu_user_regs *pregs);
+int hvm_do_hypercall(struct cpu_user_regs *pregs);
void hvm_hlt(unsigned long rflags);
void hvm_triple_fault(void);